andl $~3,reg; \
movl (reg),reg;
-
ALIGN
restore_all_guest:
ASSERT_INTERRUPTS_DISABLED
testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
- jnz restore_all_vm86
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ leal 4(%esp),%esp
+ jnz .Lrestore_iret_guest
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
- testl $2,UREGS_cs(%esp)
- jnz 1f
+ testb $2,UREGS_cs-UREGS_eip(%esp)
+ jnz .Lrestore_sregs_guest
call restore_ring0_guest
- jmp restore_all_vm86
-1:
+ jmp .Lrestore_iret_guest
#endif
-.Lft1: mov UREGS_ds(%esp),%ds
-.Lft2: mov UREGS_es(%esp),%es
-.Lft3: mov UREGS_fs(%esp),%fs
-.Lft4: mov UREGS_gs(%esp),%gs
-restore_all_vm86:
- popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %eax
- addl $4,%esp
+.Lrestore_sregs_guest:
+.Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
+.Lft2: mov UREGS_es-UREGS_eip(%esp),%es
+.Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
+.Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
+.Lrestore_iret_guest:
.Lft5: iret
.section .fixup,"ax"
-.Lfx5: subl $28,%esp
+.Lfx1: subl $28,%esp
pushl 28(%esp) # error_code/entry_vector
movl %eax,UREGS_eax+4(%esp)
movl %ebp,UREGS_ebp+4(%esp)
movl %edx,UREGS_edx+4(%esp)
movl %ecx,UREGS_ecx+4(%esp)
movl %ebx,UREGS_ebx+4(%esp)
-.Lfx1: SET_XEN_SEGMENTS(a)
- movl %eax,%fs
- movl %eax,%gs
sti
popl %esi
pushfl # EFLAGS
.long .Lft2,.Lfx1
.long .Lft3,.Lfx1
.long .Lft4,.Lfx1
- .long .Lft5,.Lfx5
+ .long .Lft5,.Lfx1
.previous
.section __ex_table,"a"
.long .Ldf1,failsafe_callback
ENTRY(hypercall)
subl $4,%esp
FIXUP_RING0_GUEST_STACK
- SAVE_ALL(b)
- sti
+ SAVE_ALL(1f,1f)
+1: sti
GET_CURRENT(%ebx)
cmpl $NR_hypercalls,%eax
jae bad_hypercall
ALIGN
handle_exception:
FIXUP_RING0_GUEST_STACK
- SAVE_ALL_NOSEGREGS(a)
- SET_XEN_SEGMENTS(a)
- testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
+ SAVE_ALL(1f,2f)
+ .text 1
+ /* Exception within Xen: make sure we have valid %ds,%es. */
+1: mov %ecx,%ds
+ mov %ecx,%es
+ jmp 2f
+ .previous
+2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
jz exception_with_ints_disabled
sti # re-enable interrupts
1: xorl %eax,%eax
jmp handle_exception
ENTRY(early_page_fault)
- SAVE_ALL_NOSEGREGS(a)
- movl %esp,%edx
- pushl %edx
+ SAVE_ALL(1f,1f)
+1: movl %esp,%eax
+ pushl %eax
call do_early_page_fault
addl $4,%esp
jmp restore_all_xen
iret
#else
# Save state but do not trash the segment registers!
- # We may otherwise be unable to reload them or copy them to ring 1.
+ pushl $TRAP_nmi<<16
+ SAVE_ALL(.Lnmi_xen,.Lnmi_common)
+.Lnmi_common:
+ movl %esp,%eax
pushl %eax
- SAVE_ALL_NOSEGREGS(a)
-
- # We can only process the NMI if:
- # A. We are the outermost Xen activation (in which case we have
- # the selectors safely saved on our stack)
- # B. DS and ES contain sane Xen values.
- # In all other cases we bail without touching DS-GS, as we have
- # interrupted an enclosing Xen activation in tricky prologue or
- # epilogue code.
- movl UREGS_eflags(%esp),%eax
- movb UREGS_cs(%esp),%al
- testl $(3|X86_EFLAGS_VM),%eax
- jnz continue_nmi
- movl %ds,%eax
- cmpw $(__HYPERVISOR_DS),%ax
- jne defer_nmi
- movl %es,%eax
- cmpw $(__HYPERVISOR_DS),%ax
- jne defer_nmi
-
-continue_nmi:
- SET_XEN_SEGMENTS(d)
- movl %esp,%edx
- pushl %edx
call do_nmi
addl $4,%esp
+ /*
+ * NB. We may return to Xen context with polluted %ds/%es. But in such
+ * cases we have put guest DS/ES on the guest stack frame, which will
+ * be detected by SAVE_ALL(), or we have rolled back restore_guest.
+ */
jmp ret_from_intr
-
-defer_nmi:
- movl $FIXMAP_apic_base,%eax
- # apic_wait_icr_idle()
-1: movl %ss:APIC_ICR(%eax),%ebx
- testl $APIC_ICR_BUSY,%ebx
- jnz 1b
- # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
- movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_PHYSICAL | \
- TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
- jmp restore_all_xen
+.Lnmi_xen:
+ /* Check the outer (guest) context for %ds/%es state validity. */
+ GET_GUEST_REGS(%ebx)
+ testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
+ mov %ds,%eax
+ mov %es,%edx
+ jnz .Lnmi_vm86
+ /* We may have interrupted Xen while messing with %ds/%es... */
+ cmpw %ax,%cx
+ mov %ecx,%ds /* Ensure %ds is valid */
+ cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
+ cmpw %dx,%cx
+ movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
+ cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
+ mov %ecx,%es /* Ensure %es is valid */
+ movl $.Lrestore_sregs_guest,%ecx
+ movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
+ cmpl %ecx,UREGS_eip(%esp)
+ jbe .Lnmi_common
+ cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
+ ja .Lnmi_common
+ /* Roll outer context restore_guest back to restoring %ds/%es. */
+ movl %ecx,UREGS_eip(%esp)
+ jmp .Lnmi_common
+.Lnmi_vm86:
+ /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
+ mov %ecx,%ds
+ mov %ecx,%es
+ jmp .Lnmi_common
#endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
ENTRY(setup_vm86_frame)
+ mov %ecx,%ds
+ mov %ecx,%es
# Copies the entire stack frame forwards by 16 bytes.
.macro copy_vm86_words count=18
.if \count
#include <asm/asm_defns.h>
#include <public/xen.h>
+#define guestreg(field) ((field)-UREGS_eip+36)
+
# Upon entry the stack should be the Xen stack and contain:
- # %ss, %esp, EFLAGS, %cs|1, %eip, ERROR, SAVE_ALL, RETURN
+ # %ss, %esp, EFLAGS, %cs|1, %eip, RETURN
# On exit the stack should be %ss:%esp (i.e. the guest stack)
# and contain:
- # EFLAGS, %cs, %eip, ERROR, SAVE_ALL, RETURN
+ # EFLAGS, %cs, %eip, RETURN
ALIGN
ENTRY(restore_ring0_guest)
+ pusha
+
# Point %gs:%esi to guest stack.
-RRG0: movw UREGS_ss+4(%esp),%gs
- movl UREGS_esp+4(%esp),%esi
+RRG0: movw guestreg(UREGS_ss)(%esp),%gs
+ movl guestreg(UREGS_esp)(%esp),%esi
- # Copy EFLAGS...EBX, RETURN from Xen stack to guest stack.
- movl $(UREGS_kernel_sizeof>>2)+1,%ecx
+ # Copy EFLAGS, %cs, %eip, RETURN, PUSHA from Xen stack to guest stack.
+ movl $12,%ecx /* 12 32-bit values */
1: subl $4,%esi
movl -4(%esp,%ecx,4),%eax
RRG1: movl %eax,%gs:(%esi)
loop 1b
-RRG2: andl $~3,%gs:UREGS_cs+4(%esi)
+RRG2: andl $~3,%gs:guestreg(UREGS_cs)(%esi)
movl %gs,%eax
# We need to do this because these registers are not present
# on the guest stack so they cannot be restored by the code in
# restore_all_guest.
-RRG3: mov UREGS_ds+4(%esp),%ds
-RRG4: mov UREGS_es+4(%esp),%es
-RRG5: mov UREGS_fs+4(%esp),%fs
-RRG6: mov UREGS_gs+4(%esp),%gs
+RRG3: mov guestreg(UREGS_ds)(%esp),%ds
+RRG4: mov guestreg(UREGS_es)(%esp),%es
+RRG5: mov guestreg(UREGS_fs)(%esp),%fs
+RRG6: mov guestreg(UREGS_gs)(%esp),%gs
RRG7: movl %eax,%ss
movl %esi,%esp
+ popa
ret
.section __ex_table,"a"
.long RRG0,domain_crash_synchronous
return 0;
}
-#include <asm/asm_defns.h>
-BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
-fastcall void smp_deferred_nmi(struct cpu_user_regs *regs)
-{
- asmlinkage void do_nmi(struct cpu_user_regs *);
- ack_APIC_irq();
- do_nmi(regs);
-}
-
void __init percpu_traps_init(void)
{
struct tss_struct *tss = &doublefault_tss;
/* The hypercall entry vector is only accessible from ring 1. */
_set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
- set_intr_gate(TRAP_deferred_nmi, &deferred_nmi);
-
/*
* Make a separate task for double faults. This will get us debug output if
* we blow the kernel stack.
#define TRAP_alignment_check 17
#define TRAP_machine_check 18
#define TRAP_simd_error 19
-#define TRAP_deferred_nmi 31
/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
#define ASSERT_INTERRUPTS_ENABLED ASSERT_INTERRUPT_STATUS(nz)
#define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
-#define __SAVE_ALL_PRE \
+/*
+ * Saves all register state into an exception/interrupt stack frame.
+ * Returns to the caller at <xen_lbl> if the interrupted context is within
+ * Xen; at <vm86_lbl> if the interrupted context is vm86; or falls through
+ * if the interrupted context is an ordinary guest protected-mode context.
+ * In all cases %ecx contains __HYPERVISOR_DS. %ds/%es are guaranteed to
+ * contain __HYPERVISOR_DS unless control passes to <xen_lbl>, in which case
+ * the caller is reponsible for validity of %ds/%es.
+ */
+#define SAVE_ALL(xen_lbl, vm86_lbl) \
cld; \
pushl %eax; \
pushl %ebp; \
pushl %ecx; \
pushl %ebx; \
testl $(X86_EFLAGS_VM),UREGS_eflags(%esp); \
- jz 2f; \
- call setup_vm86_frame; \
- jmp 3f; \
- 2:testb $3,UREGS_cs(%esp); \
- jz 1f; \
- mov %ds,UREGS_ds(%esp); \
- mov %es,UREGS_es(%esp); \
- mov %fs,UREGS_fs(%esp); \
- mov %gs,UREGS_gs(%esp); \
- 3:
-
-#define SAVE_ALL_NOSEGREGS(_reg) \
- __SAVE_ALL_PRE \
- 1:
-
-#define SET_XEN_SEGMENTS(_reg) \
- movl $(__HYPERVISOR_DS),%e ## _reg ## x; \
- mov %e ## _reg ## x,%ds; \
- mov %e ## _reg ## x,%es;
-
-#define SAVE_ALL(_reg) \
- __SAVE_ALL_PRE \
- SET_XEN_SEGMENTS(_reg) \
- 1:
+ mov %ds,%edi; \
+ mov %es,%esi; \
+ mov $(__HYPERVISOR_DS),%ecx; \
+ jnz 86f; \
+ .text 1; \
+ 86: call setup_vm86_frame; \
+ jmp vm86_lbl; \
+ .previous; \
+ testb $3,UREGS_cs(%esp); \
+ jz xen_lbl; \
+ /* \
+ * We are the outermost Xen context, but our \
+ * life is complicated by NMIs and MCEs. These \
+ * could occur in our critical section and \
+ * pollute %ds and %es. We have to detect that \
+ * this has occurred and avoid saving Xen DS/ES \
+ * values to the guest stack frame. \
+ */ \
+ cmpw %cx,%di; \
+ mov %ecx,%ds; \
+ mov %fs,UREGS_fs(%esp); \
+ cmove UREGS_ds(%esp),%edi; \
+ cmpw %cx,%si; \
+ mov %edi,UREGS_ds(%esp); \
+ cmove UREGS_es(%esp),%esi; \
+ mov %ecx,%es; \
+ mov %gs,UREGS_gs(%esp); \
+ mov %esi,UREGS_es(%esp)
#ifdef PERF_COUNTERS
#define PERFC_INCR(_name,_idx,_cur) \
STR(x) ":\n\t" \
"pushl $"#v"<<16\n\t" \
STR(FIXUP_RING0_GUEST_STACK) \
- STR(SAVE_ALL(a)) \
- "movl %esp,%eax\n\t" \
+ STR(SAVE_ALL(1f,1f)) "\n\t" \
+ "1:movl %esp,%eax\n\t" \
"pushl %eax\n\t" \
"call "STR(smp_##x)"\n\t" \
"addl $4,%esp\n\t" \
"\n" __ALIGN_STR"\n" \
"common_interrupt:\n\t" \
STR(FIXUP_RING0_GUEST_STACK) \
- STR(SAVE_ALL(a)) \
- "movl %esp,%eax\n\t" \
+ STR(SAVE_ALL(1f,1f)) "\n\t" \
+ "1:movl %esp,%eax\n\t" \
"pushl %eax\n\t" \
"call " STR(do_IRQ) "\n\t" \
"addl $4,%esp\n\t" \